knitr::opts_chunk$set(fig.width=5, fig.height=2.5) 
library(rcartocolor)
library(tidyverse)
library(brms)

colour_theme <- "BurgYl"
palette <- carto_pal(7, colour_theme)

Notes

To model deeper conditionality we need interactions. An interaction is a kind of conditioning, a way of allowing parameters (really their posterior distributions) to be conditional on further aspects of the data. The simplest kind of interaction, a linear interaction, is built by extending the lin- ear modeling strategy to parameters within the linear model. Multilevel models induce similar effects. Common sorts of multilevel models are essentially massive interaction models, in which estimates (intercepts and slopes) are conditional on clusters (person, genus, village, city, galaxy) in the data.

Building an Interaction

We’ll start by looking at the relationship between ruggedness and GDP. This relationship appears to be different in the continent of Africa.

library(rethinking)
data(rugged)
d <- as_data_frame(rugged) %>% 
  mutate(log_gdp = log(rgdppc_2000),
         africa = ifelse(cont_africa == 1, "Africa", "Not Africa"))

d %>% 
  ggplot(aes(x = rugged, y = log_gdp)) +
  geom_smooth(method = lm, colour = palette[6], fill = palette[2], na.rm = TRUE) + 
  geom_point(colour = palette[6], na.rm = TRUE) + 
  facet_wrap(~ africa) +
  xlab("Ruggedness Index") + ylab("Log GDP") + 
  theme(text = element_text(family = "Courier", size = 12, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          panel.grid = element_blank(),
          strip.background = element_rect(fill=palette[2])) +
    scale_colour_manual(values = palette[5:7]) +
  coord_cartesian(xlim = c(0, 5))

We can model this relationship directly using an interaction effect, and its better to do so in this way.

  1. Parameters such as \(\sigma\) can learn across the whole sample. Splitting the data and training two regressions assumes a different variance between the two groups.
  2. You can acquire probability statements about the variable you split on.
  3. You can compare models more easily.
  4. Eventually, we can borrow data across categories when fitting multilevel models.
dd <- d %>%
  mutate(cont_africa = as.factor(cont_africa))

fit7.1 <- brm(log_gdp ~ 1 + rugged + cont_africa + rugged:cont_africa,
              data = dd, family = gaussian,
              prior = c(prior(normal(8, 100), class = Intercept),
                        prior(normal(0, 1), class = b),
                        prior(uniform(0, 10), class = sigma)),
              iter = 2000, warmup = 1000, chains = 4, cores = 4)
Rows containing NAs were excluded from the model.Compiling the C++ model
recompiling to avoid crashing R session
Start sampling
starting worker pid=71177 on localhost:11649 at 20:27:08.179
starting worker pid=71187 on localhost:11649 at 20:27:08.406
starting worker pid=71197 on localhost:11649 at 20:27:08.634
starting worker pid=71207 on localhost:11649 at 20:27:08.859

SAMPLING FOR MODEL '8a10fc11cced5cebfd5ecabd8ff4d9c6' NOW (CHAIN 1).
Chain 1: 
Chain 1: Gradient evaluation took 5.3e-05 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.53 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 0.104774 seconds (Warm-up)
Chain 1:                0.103669 seconds (Sampling)
Chain 1:                0.208443 seconds (Total)
Chain 1: 

SAMPLING FOR MODEL '8a10fc11cced5cebfd5ecabd8ff4d9c6' NOW (CHAIN 2).
Chain 2: 
Chain 2: Gradient evaluation took 8.5e-05 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.85 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 0.107247 seconds (Warm-up)
Chain 2:                0.102246 seconds (Sampling)
Chain 2:                0.209493 seconds (Total)
Chain 2: 

SAMPLING FOR MODEL '8a10fc11cced5cebfd5ecabd8ff4d9c6' NOW (CHAIN 3).
Chain 3: 
Chain 3: Gradient evaluation took 9.1e-05 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.91 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 0.114844 seconds (Warm-up)
Chain 3:                0.120197 seconds (Sampling)
Chain 3:                0.235041 seconds (Total)
Chain 3: 

SAMPLING FOR MODEL '8a10fc11cced5cebfd5ecabd8ff4d9c6' NOW (CHAIN 4).
Chain 4: 
Chain 4: Gradient evaluation took 4.7e-05 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.47 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 0.096713 seconds (Warm-up)
Chain 4:                0.100714 seconds (Sampling)
Chain 4:                0.197427 seconds (Total)
Chain 4: 
plot(fit7.1)

fit7.1
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: log_gdp ~ 1 + rugged + cont_africa + rugged:cont_africa 
   Data: dd (Number of observations: 170) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Population-Level Effects: 
                    Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
Intercept               9.19      0.14     8.92     9.46       2736 1.00
rugged                 -0.19      0.08    -0.33    -0.04       2431 1.00
cont_africa1           -1.85      0.22    -2.29    -1.41       2410 1.00
rugged:cont_africa1     0.35      0.13     0.09     0.61       2321 1.00

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
sigma     0.95      0.05     0.86     1.06       4012 1.00

Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample 
is a crude measure of effective sample size, and Rhat is the potential 
scale reduction factor on split chains (at convergence, Rhat = 1).

This is the easy way to examine the effect of the interaction.

marginal_effects_plot <- plot(marginal_effects(fit7.1, effects = "rugged:cont_africa"), 
                              points = TRUE)

We’ll do it manually instead for instructive purposes.

nd <- tibble(rugged = rep(seq(from=0, to=8, length.out=100), 2),
             cont_africa = as.factor(c(rep(1, 100), rep(0, 100))))

fitted(fit7.1, newdata = nd) %>% 
  as_tibble() %>% 
  bind_cols(nd) %>% 
  mutate(
    cont_africa = ifelse(cont_africa == 1, "Africa", "Not Africa")) %>% 
  ggplot(data = ., aes(x = rugged, y = Estimate, 
                       ymin = Q2.5, ymax = Q97.5,
                       colour = cont_africa, fill = cont_africa)) +
    geom_ribbon(alpha = I(1/3), colour = "transparent") + 
    geom_line() +
    geom_jitter(data = dd, inherit.aes = FALSE, fill = palette[7],
                aes(x = rugged, y = log_gdp), alpha = I(2/3), na.rm=TRUE) +
    xlab("Rugedness Index") + ylab("GDP") +
    theme(text = element_text(family = "Courier", size = 12, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          panel.grid = element_blank(),
          legend.position = c(0, 1),
          legend.justification = c(0, 1),
          legend.background = element_rect(fill = "transparent"),
          legend.title = element_blank(),
          strip.background = element_rect(fill=palette[2])) +
    scale_fill_manual(values = c(palette[3], palette[6])) +
    scale_colour_manual(values = c(palette[3], palette[6]))

post <- posterior_samples(fit7.1) %>% 
  mutate(ruggedness_outside_africa = b_rugged + `b_rugged:cont_africa1`,
         ruggedness_in_africa = b_rugged,
         difference = ruggedness_in_africa - ruggedness_outside_africa)

p1 <- post %>% 
  ggplot(aes(difference, 0)) +
  geom_halfeyeh(.width = c(.5, .95),
                fill = palette[6],
                colour = palette[6]) +
  geom_vline(xintercept = 0, linetype = 2, colour = palette[7]) +
  xlab("Difference in the effect of ruggedness on log(GDP) : In Africa vs Out Of Africa") +
  ylab("") +
  theme(text = element_text(family = "Courier", size = 10, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          panel.grid = element_blank()) +
    scale_colour_manual(values = palette[5:7])
  
p2 <- post %>% 
  transmute(`In Africa` = ruggedness_outside_africa, 
            `Outside Africa` = ruggedness_in_africa) %>% 
  gather(Variable, Value) %>% 
  ggplot(aes(Value, fill = Variable)) +
    geom_density(alpha = I(.8), colour = "transparent") +
    xlab("The effect of ruggedness on GDP") +
    theme(text = element_text(family = "Courier", size = 10, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          panel.grid = element_blank(),
          legend.position = c(1, 1),
          legend.justification = c(1, 1),
          legend.background = element_rect(fill = "transparent"),
          legend.title = element_blank()) +
    scale_fill_manual(values = c(palette[3], palette[6]))

gridExtra::grid.arrange(p2, p1)

Symmetry of the linear interaction

The interaction there has two equally valid phrasings. 1. How much does the influence of ruggedness (on GDP) depend upon whether the nation is in Africa? 2. How much does the influence of being in Africa (on GDP) depend upon ruggedness?

Continuous Interactions

In this section, we’ll

library(rethinking)
data(tulips)
d <- as_data_frame(tulips)
detach(package:rethinking, unload = T)

d %>% skimr::skim()
Skim summary statistics
 n obs: 27 
 n variables: 4 

── Variable type:factor ───────────────────────────────────────────────────────────────────
 variable missing complete  n n_unique              top_counts ordered
      bed       0       27 27        3 a: 9, b: 9, c: 9, NA: 0   FALSE

── Variable type:integer ──────────────────────────────────────────────────────────────────
 variable missing complete  n mean   sd p0 p25 p50 p75 p100     hist
    shade       0       27 27    2 0.83  1   1   2   3    3 ▇▁▁▇▁▁▁▇
    water       0       27 27    2 0.83  1   1   2   3    3 ▇▁▁▇▁▁▁▇

── Variable type:numeric ──────────────────────────────────────────────────────────────────
 variable missing complete  n   mean    sd p0   p25    p50   p75   p100     hist
   blooms       0       27 27 128.99 92.68  0 71.12 111.04 190.3 361.66 ▅▇▇▁▅▂▁▁
loo(b7.6, b7.7)
             LOOIC   SE
b7.6        304.00 7.72
b7.7        294.07 7.91
b7.6 - b7.7   9.94 5.33

Lets look at the posterior summary

posterior_summary(b7.7) %>% round(digits = 2)
              Estimate Est.Error    Q2.5   Q97.5
b_Intercept    -110.30     62.14 -229.47   12.85
b_water         161.23     28.44  105.00  218.19
b_shade          45.46     28.46   -9.77  100.17
b_water:shade   -43.92     12.99  -69.19  -17.81
sigma            50.14      7.58   37.83   67.13
lp__           -170.71      1.73 -174.94 -168.40

Almost all the weight went to the interaction model, b7.7.

model_weights(b7.6, b7.7, weights = "waic")

Now lets try centering and scaling our estimates first.

dd <- d %>%
  mutate(shade_c = (shade - mean(shade)) / sd(shade),
         water_c = (water - mean(water)) / sd(water))

b7.8 <-
  brm(data = dd, family = gaussian,
      blooms ~ 1 + water_c + shade_c,
      prior = c(prior(normal(130, 100), class = Intercept),
                prior(normal(0, 100), class = b),
                prior(cauchy(0, 10), class = sigma)),
      iter = 2000, warmup = 1000, chains = 4, cores = 4,
      control = list(adapt_delta = 0.9))
Compiling the C++ model
Start sampling
starting worker pid=72544 on localhost:11649 at 20:49:37.550
starting worker pid=72554 on localhost:11649 at 20:49:37.777
starting worker pid=72564 on localhost:11649 at 20:49:38.001
starting worker pid=72574 on localhost:11649 at 20:49:38.220

SAMPLING FOR MODEL '5a4b0bbe706bdf184081544faef93837' NOW (CHAIN 1).
Chain 1: 
Chain 1: Gradient evaluation took 3.7e-05 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.37 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 0.193857 seconds (Warm-up)
Chain 1:                0.04078 seconds (Sampling)
Chain 1:                0.234637 seconds (Total)
Chain 1: 

SAMPLING FOR MODEL '5a4b0bbe706bdf184081544faef93837' NOW (CHAIN 2).
Chain 2: 
Chain 2: Gradient evaluation took 3.9e-05 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.39 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 0.164548 seconds (Warm-up)
Chain 2:                0.04436 seconds (Sampling)
Chain 2:                0.208908 seconds (Total)
Chain 2: 

SAMPLING FOR MODEL '5a4b0bbe706bdf184081544faef93837' NOW (CHAIN 3).
Chain 3: 
Chain 3: Gradient evaluation took 7.1e-05 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.71 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 0.188727 seconds (Warm-up)
Chain 3:                0.050933 seconds (Sampling)
Chain 3:                0.23966 seconds (Total)
Chain 3: 

SAMPLING FOR MODEL '5a4b0bbe706bdf184081544faef93837' NOW (CHAIN 4).
Chain 4: 
Chain 4: Gradient evaluation took 4.2e-05 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.42 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 0.173898 seconds (Warm-up)
Chain 4:                0.043751 seconds (Sampling)
Chain 4:                0.217649 seconds (Total)
Chain 4: 
b7.9 <- 
  update(b7.8, 
         formula = blooms ~ 1 + water_c + shade_c + water_c:shade_c)
Start sampling

SAMPLING FOR MODEL '5a4b0bbe706bdf184081544faef93837' NOW (CHAIN 1).
Chain 1: 
Chain 1: Gradient evaluation took 3.8e-05 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.38 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 0.231247 seconds (Warm-up)
Chain 1:                0.054888 seconds (Sampling)
Chain 1:                0.286135 seconds (Total)
Chain 1: 

SAMPLING FOR MODEL '5a4b0bbe706bdf184081544faef93837' NOW (CHAIN 2).
Chain 2: 
Chain 2: Gradient evaluation took 1.3e-05 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 0.216782 seconds (Warm-up)
Chain 2:                0.05399 seconds (Sampling)
Chain 2:                0.270772 seconds (Total)
Chain 2: 

SAMPLING FOR MODEL '5a4b0bbe706bdf184081544faef93837' NOW (CHAIN 3).
Chain 3: 
Chain 3: Gradient evaluation took 1.3e-05 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 0.169286 seconds (Warm-up)
Chain 3:                0.053028 seconds (Sampling)
Chain 3:                0.222314 seconds (Total)
Chain 3: 

SAMPLING FOR MODEL '5a4b0bbe706bdf184081544faef93837' NOW (CHAIN 4).
Chain 4: 
Chain 4: Gradient evaluation took 1.3e-05 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 0.213567 seconds (Warm-up)
Chain 4:                0.053374 seconds (Sampling)
Chain 4:                0.266941 seconds (Total)
Chain 4: 
plot(b7.9)

posterior_samples(b7.9) %>% 
  select(starts_with("b_")) %>% 
  gather(Variable, Value) %>% 
  ggplot(aes(y = fct_rev(Variable), x = Value)) +
    geom_halfeyeh(.width = c(.95, .5), fill = palette[6]) +
    theme(text = element_text(family = "Courier", size = 12, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          #panel.grid = element_blank(),
          legend.position = c(0, 1),
          legend.justification = c(0, 1),
          legend.background = element_rect(fill = "transparent"),
          legend.title = element_blank(),
          strip.background = element_rect(fill=palette[2])) +
    scale_fill_manual(values = c(palette[3], palette[6])) +
    scale_colour_manual(values = c(palette[3], palette[6])) +
    scale_x_continuous(breaks = seq(from=-50, to=150, length.out = 5)) +
    ylab("Variable") + xlab("Value") + ggtitle("Coefplot")

NA

Here is how we interperate each of these estimates:

  1. b_Intercept is the expected value of blooms when both water and shade are at their average values.
  2. b_shade_c is the expected change in blooms when shade increases by one unit and water is at its average value.
  3. b_water_c is the expected change in blooms when water increases by one unit and shade is at its average value.
  4. b_water_c:shade_c is the interaction effect. It tells us the expected change in the influence of water on blooms when increasing shade by one unit. It also tells us the expected change in the influence of shade on blooms when increasing water by one unit.
# fitted() for model b7.8
fitted(b7.8) %>%
  as_tibble() %>%
  # adding fitted() for model b7.9
  bind_rows(
    fitted(b7.9) %>% 
      as_tibble()
  ) %>% 
  # We'll want to index the models
  mutate(fit  = rep(c("b7.8", "b7.9"), each = 27)) %>% 
  # Here we add the data, `d`
  bind_cols(bind_rows(d, d)) %>% 
  # These will come in handy for `ggplot2::facet_grid()`
  mutate(x_grid = paste("water =", water),
         y_grid = paste("model: ", fit)) %>% 
  
  ggplot(aes(x = shade)) +
  geom_ribbon(aes(ymin = Q2.5,
                  ymax = Q97.5), 
              fill = palette[3], alpha = 1/5) +
  geom_line(aes(y = Estimate), 
            color = palette[6]) +
  geom_point(aes(y = blooms, group = x_grid), 
             shape = 1, color = palette[7]) +
  coord_cartesian(xlim = range(d$shade), 
                  ylim = range(d$blooms)) +
  scale_x_continuous("Shade (centered)", breaks = c(-1, 0, 1)) +
  ylab("Blooms") +
  facet_grid(y_grid ~ x_grid) +
  scale_x_continuous(breaks = c(-1, 0, 1)) +
  theme(text = element_text(family = "Courier", size = 12, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          legend.position = c(0, 1),
          legend.justification = c(0, 1),
          legend.background = element_rect(fill = "transparent"),
          legend.title = element_blank(),
          strip.background = element_rect(fill=palette[2])) +
    scale_fill_manual(values = c(palette[3], palette[6])) +
    scale_colour_manual(values = c(palette[3], palette[6]))
Scale for 'x' is already present. Adding another scale for 'x', which will replace the
existing scale.

Homework

7H1

library(recipes)
library(rethinking)
rethinking (Version 1.59)

Attaching package: ‘rethinking’

The following objects are masked from ‘package:brms’:

    LOO, stancode, WAIC

The following object is masked from ‘package:testthat’:

    compare

The following object is masked from ‘package:purrr’:

    map
data(tulips)
d <- tulips %>% 
  as_data_frame
detach(package:rethinking, unload = T)

dm <- recipe(blooms ~ 0 + bed + water + shade,
       data = d) %>%
  step_center(water, shade) %>% 
  step_dummy(bed, one_hot = TRUE) %>%
  prep(d) %>% 
  bake(d)

# So now we use better priors and listen to STAN's 
# advice of changing adapt_delta
b7h.1 <- brm(blooms ~ 0 + bed+ water + shade + water:shade,
                data = d, family = gaussian,
                prior = c(prior(normal(0, 100), class = b),
                          prior(cauchy(0, 10), class = sigma)),
                iter = 2000, warmup = 1000, chains = 4, cores = 4,
                control = list(adapt_delta = 0.9))
Compiling the C++ model
Start sampling
starting worker pid=75205 on localhost:11649 at 22:05:29.773
starting worker pid=75215 on localhost:11649 at 22:05:30.000
starting worker pid=75225 on localhost:11649 at 22:05:30.224
starting worker pid=75235 on localhost:11649 at 22:05:30.449

SAMPLING FOR MODEL 'ea6f8b0f171e7cb672fd8882823f99c8' NOW (CHAIN 1).
Chain 1: 
Chain 1: Gradient evaluation took 3.9e-05 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.39 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 0.433408 seconds (Warm-up)
Chain 1:                0.270968 seconds (Sampling)
Chain 1:                0.704376 seconds (Total)
Chain 1: 

SAMPLING FOR MODEL 'ea6f8b0f171e7cb672fd8882823f99c8' NOW (CHAIN 2).
Chain 2: 
Chain 2: Gradient evaluation took 4e-05 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.4 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 0.416783 seconds (Warm-up)
Chain 2:                0.197261 seconds (Sampling)
Chain 2:                0.614044 seconds (Total)
Chain 2: 

SAMPLING FOR MODEL 'ea6f8b0f171e7cb672fd8882823f99c8' NOW (CHAIN 3).
Chain 3: 
Chain 3: Gradient evaluation took 3.7e-05 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.37 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 0.462946 seconds (Warm-up)
Chain 3:                0.196096 seconds (Sampling)
Chain 3:                0.659042 seconds (Total)
Chain 3: 

SAMPLING FOR MODEL 'ea6f8b0f171e7cb672fd8882823f99c8' NOW (CHAIN 4).
Chain 4: 
Chain 4: Gradient evaluation took 3.8e-05 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.38 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 0.431762 seconds (Warm-up)
Chain 4:                0.199138 seconds (Sampling)
Chain 4:                0.6309 seconds (Total)
Chain 4: 
plot(b7h.1)

plot(marginal_effects(b7h.1,
                      effects = "water:shade"),
     points = T)

---
title: "Chapter 7 - Interactions"
output: html_notebook
---

```{r setup}
knitr::opts_chunk$set(fig.width=5, fig.height=2.5) 
library(rcartocolor)
library(tidyverse)
library(brms)

colour_theme <- "BurgYl"
palette <- carto_pal(7, colour_theme)
```

# Notes

To model deeper conditionality we need interactions. An interaction is a kind of conditioning, a way of allowing parameters (really their posterior distributions) to be conditional on further aspects of the data. The simplest kind of interaction, a linear interaction, is built by extending the lin- ear modeling strategy to parameters within the linear model. Multilevel models induce similar effects. Common sorts of multilevel models are essentially massive interaction models, in which estimates (intercepts and slopes) are conditional on clusters (person, genus, village, city, galaxy) in the data.

## Building an Interaction
We'll start by looking at the relationship between ruggedness and GDP. This relationship appears to be different in the continent of Africa.
```{r}
library(rethinking)
data(rugged)
detach(package:rethinking, unload = T)

d <- as_data_frame(rugged) %>% 
  mutate(log_gdp = log(rgdppc_2000),
         africa = ifelse(cont_africa == 1, "Africa", "Not Africa"))

d %>% 
  ggplot(aes(x = rugged, y = log_gdp)) +
  geom_smooth(method = lm, colour = palette[6], fill = palette[2], na.rm = TRUE) + 
  geom_point(colour = palette[6], na.rm = TRUE) + 
  facet_wrap(~ africa) +
  xlab("Ruggedness Index") + ylab("Log GDP") + 
  theme(text = element_text(family = "Courier", size = 12, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          panel.grid = element_blank(),
          strip.background = element_rect(fill=palette[2])) +
    scale_colour_manual(values = palette[5:7]) +
  coord_cartesian(xlim = c(0, 5))
```

We can model this relationship directly using an interaction effect, and its better to do so in this way. 

1. Parameters such as $\sigma$ can learn across the whole sample. Splitting the data and training two regressions assumes a different variance between the two groups.
2. You can acquire probability statements about the variable you split on.
3. You can compare models more easily.
4. Eventually, we can borrow data across categories when fitting multilevel models.

```{r}
dd <- d %>%
  mutate(cont_africa = as.factor(cont_africa))

fit7.1 <- brm(log_gdp ~ 1 + rugged + cont_africa + rugged:cont_africa,
              data = dd, family = gaussian,
              prior = c(prior(normal(8, 100), class = Intercept),
                        prior(normal(0, 1), class = b),
                        prior(uniform(0, 10), class = sigma)),
              iter = 2000, warmup = 1000, chains = 4, cores = 4)
plot(fit7.1)
```
```{r}
fit7.1
```

This is the easy way to examine the effect of the interaction.
```{r eval=FALSE}
marginal_effects_plot <- plot(marginal_effects(fit7.1, effects = "rugged:cont_africa"), 
                              points = TRUE)
```

We'll do it manually instead for instructive purposes.
```{r}
nd <- tibble(rugged = rep(seq(from=0, to=8, length.out=100), 2),
             cont_africa = as.factor(c(rep(1, 100), rep(0, 100))))

fitted(fit7.1, newdata = nd) %>% 
  as_tibble() %>% 
  bind_cols(nd) %>% 
  mutate(
    cont_africa = ifelse(cont_africa == 1, "Africa", "Not Africa")) %>% 
  ggplot(data = ., aes(x = rugged, y = Estimate, 
                       ymin = Q2.5, ymax = Q97.5,
                       colour = cont_africa, fill = cont_africa)) +
    geom_ribbon(alpha = I(1/3), colour = "transparent") + 
    geom_line() +
    geom_jitter(data = dd, inherit.aes = FALSE, fill = palette[7],
                aes(x = rugged, y = log_gdp), alpha = I(2/3), na.rm=TRUE) +
    xlab("Rugedness Index") + ylab("GDP") +
    theme(text = element_text(family = "Courier", size = 12, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          panel.grid = element_blank(),
          legend.position = c(0, 1),
          legend.justification = c(0, 1),
          legend.background = element_rect(fill = "transparent"),
          legend.title = element_blank(),
          strip.background = element_rect(fill=palette[2])) +
    scale_fill_manual(values = c(palette[3], palette[6])) +
    scale_colour_manual(values = c(palette[3], palette[6]))
```


```{r}
post <- posterior_samples(fit7.1) %>% 
  mutate(ruggedness_outside_africa = b_rugged + `b_rugged:cont_africa1`,
         ruggedness_in_africa = b_rugged,
         difference = ruggedness_in_africa - ruggedness_outside_africa)

p1 <- post %>% 
  ggplot(aes(difference, 0)) +
  geom_halfeyeh(.width = c(.5, .95),
                fill = palette[6],
                colour = palette[6]) +
  geom_vline(xintercept = 0, linetype = 2, colour = palette[7]) +
  xlab("Difference in the effect of ruggedness on log(GDP) : In Africa vs Out Of Africa") +
  ylab("") +
  theme(text = element_text(family = "Courier", size = 10, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          panel.grid = element_blank()) +
    scale_colour_manual(values = palette[5:7])
  
p2 <- post %>% 
  transmute(`In Africa` = ruggedness_outside_africa, 
            `Outside Africa` = ruggedness_in_africa) %>% 
  gather(Variable, Value) %>% 
  ggplot(aes(Value, fill = Variable)) +
    geom_density(alpha = I(.8), colour = "transparent") +
    xlab("The effect of ruggedness on GDP") +
    theme(text = element_text(family = "Courier", size = 10, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          panel.grid = element_blank(),
          legend.position = c(1, 1),
          legend.justification = c(1, 1),
          legend.background = element_rect(fill = "transparent"),
          legend.title = element_blank()) +
    scale_fill_manual(values = c(palette[3], palette[6]))

gridExtra::grid.arrange(p2, p1)
```

## Symmetry of the linear interaction
The interaction there has two equally valid phrasings.
1. How much does the influence of ruggedness (on GDP) depend upon whether the nation is in Africa?
2. How much does the influence of being in Africa (on GDP) depend upon ruggedness?

## Continuous Interactions

In this section, we'll 
```{r message=FALSE, warning=FALSE}
library(rethinking)
data(tulips)
d <- as_data_frame(tulips)
detach(package:rethinking, unload = T)

d %>% skimr::skim()
```

```{r}
# These first two have some issues
m7.6 <- brm(blooms ~ 1 + water + shade,
             data = d, family = gaussian,
             prior = c(prior(normal(0, 100), class = "Intercept"),
                       prior(normal(0, 100), class = "b"),
                       prior(uniform(0, 100), class = "sigma")),
             iter = 2000, warmup = 1000, chains = 4, cores = 4)

m7.7 <- brm(blooms ~ 1 + water + shade + water:shade,
             data = d, family = gaussian,
             prior = c(prior(normal(0, 100), class = "Intercept"),
                       prior(normal(0, 100), class = "b"),
                       prior(uniform(0, 100), class = "sigma")),
             iter = 2000, warmup = 1000, chains = 4, cores = 4)

# So now we use better priors and listen to STAN's 
# advice of changing adapt_delta
b7.6 <- update(m7.6,
               prior = c(prior(normal(0, 100), class = Intercept),
                         prior(normal(0, 100), class = b),
                         prior(cauchy(0, 10), class = sigma)),
               control = list(adapt_delta = 0.9))

b7.7 <- update(b7.6,
               formula = blooms ~ 1 + water + shade + water:shade)


loo(b7.6, b7.7)
```

Lets look at the posterior summary
```{r}
posterior_summary(b7.7) %>% round(digits = 2)
```

Almost all the weight went to the interaction model, b7.7.
```{r}
model_weights(b7.6, b7.7, weights = "waic")

```

Now lets try centering and scaling our estimates first.
```{r fig.height=7}
dd <- d %>%
  mutate(shade_c = (shade - mean(shade)) / sd(shade),
         water_c = (water - mean(water)) / sd(water))

b7.8 <-
  brm(data = dd, family = gaussian,
      blooms ~ 1 + water_c + shade_c,
      prior = c(prior(normal(130, 100), class = Intercept),
                prior(normal(0, 100), class = b),
                prior(cauchy(0, 10), class = sigma)),
      iter = 2000, warmup = 1000, chains = 4, cores = 4,
      control = list(adapt_delta = 0.9))

b7.9 <- 
  update(b7.8, 
         formula = blooms ~ 1 + water_c + shade_c + water_c:shade_c)

plot(b7.9)
```

```{r}
posterior_samples(b7.9) %>% 
  select(starts_with("b_")) %>% 
  gather(Variable, Value) %>% 
  ggplot(aes(y = fct_rev(Variable), x = Value)) +
    geom_halfeyeh(.width = c(.95, .5), fill = palette[6]) +
    theme(text = element_text(family = "Courier", size = 12, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          legend.position = c(0, 1),
          legend.justification = c(0, 1),
          legend.background = element_rect(fill = "transparent"),
          legend.title = element_blank(),
          strip.background = element_rect(fill=palette[2])) +
    scale_fill_manual(values = c(palette[3], palette[6])) +
    scale_colour_manual(values = c(palette[3], palette[6])) +
    scale_x_continuous(breaks = seq(from=-50, to=150, length.out = 5)) +
    ylab("Variable") + xlab("Value") + ggtitle("Coefplot")
      
```

Here is how we interperate each of these estimates:

1. `b_Intercept` is the expected value of blooms when both water and shade are at their average values.
2. `b_shade_c` is the expected change in blooms when shade increases by one unit and water is at its average value.
3. `b_water_c` is the expected change in blooms when water increases by one unit and shade is at its average value.
4. `b_water_c:shade_c` is the interaction effect. It tells us the expected change in the influence of water on blooms when increasing shade by one unit. It also tells us the expected change in the influence of shade on blooms when increasing water by one unit.

```{r}
# fitted() for model b7.8
fitted(b7.8) %>%
  as_tibble() %>%
  # adding fitted() for model b7.9
  bind_rows(
    fitted(b7.9) %>% 
      as_tibble()
  ) %>% 
  # We'll want to index the models
  mutate(fit  = rep(c("b7.8", "b7.9"), each = 27)) %>% 
  # Here we add the data, `d`
  bind_cols(bind_rows(d, d)) %>% 
  # These will come in handy for `ggplot2::facet_grid()`
  mutate(x_grid = paste("water =", water),
         y_grid = paste("model: ", fit)) %>% 
  
  ggplot(aes(x = shade)) +
  geom_ribbon(aes(ymin = Q2.5,
                  ymax = Q97.5), 
              fill = palette[3], alpha = 1/5) +
  geom_line(aes(y = Estimate), 
            color = palette[6]) +
  geom_point(aes(y = blooms, group = x_grid), 
             shape = 1, color = palette[7]) +
  coord_cartesian(xlim = range(d$shade), 
                  ylim = range(d$blooms)) +
  scale_x_continuous("Shade (centered)", breaks = c(-1, 0, 1)) +
  ylab("Blooms") +
  facet_grid(y_grid ~ x_grid) +
  theme(text = element_text(family = "Courier", size = 12, colour = palette[7]),
          panel.background = element_rect(fill = alpha(palette[1], 1/4)),
          legend.position = c(0, 1),
          legend.justification = c(0, 1),
          legend.background = element_rect(fill = "transparent"),
          legend.title = element_blank(),
          strip.background = element_rect(fill=palette[2])) +
    scale_fill_manual(values = c(palette[3], palette[6])) +
    scale_colour_manual(values = c(palette[3], palette[6]))
```

# Homework

## 7H1
```{r}
library(recipes)
library(rethinking)
data(tulips)
d <- tulips %>% 
  as_data_frame
detach(package:rethinking, unload = T)

dm <- recipe(blooms ~ 0 + bed + water + shade,
       data = d) %>%
  step_center(water, shade) %>% 
  step_dummy(bed, one_hot = TRUE) %>%
  prep(d) %>% 
  bake(d)

# So now we use better priors and listen to STAN's 
# advice of changing adapt_delta
b7h.1 <- brm(blooms ~ 0 + bed+ water + shade + water:shade,
                data = d, family = gaussian,
                prior = c(prior(normal(0, 100), class = b),
                          prior(cauchy(0, 10), class = sigma)),
                iter = 2000, warmup = 1000, chains = 4, cores = 4,
                control = list(adapt_delta = 0.9))

plot(b7h.1)
```

```{r}
plot(marginal_effects(b7h.1,
                      effects = "water:shade"),
     points = T)
```




